diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc_helper.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_drv.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_edid.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_encoder_slave.c | 116 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_modes.c | 424 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_sysfs.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 296 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_global.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_memory.c | 488 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_module.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 37 |
19 files changed, 1454 insertions, 329 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index fe23f29f7cba..5f0aec4f082a 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -11,7 +11,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \ - drm_info.o drm_debugfs.o + drm_info.o drm_debugfs.o drm_encoder_slave.o drm-$(CONFIG_COMPAT) += drm_ioc32.o diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 2f631c75f704..c20fcdc65497 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -68,10 +68,10 @@ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) */ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { - { DRM_MODE_SCALE_NON_GPU, "Non-GPU" }, - { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" }, - { DRM_MODE_SCALE_NO_SCALE, "No scale" }, - { DRM_MODE_SCALE_ASPECT, "Aspect" }, + { DRM_MODE_SCALE_NONE, "None" }, + { DRM_MODE_SCALE_FULLSCREEN, "Full" }, + { DRM_MODE_SCALE_CENTER, "Center" }, + { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = @@ -108,6 +108,7 @@ static struct drm_prop_enum_list drm_tv_select_enum_list[] = { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) @@ -118,6 +119,7 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, @@ -146,6 +148,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, + { DRM_MODE_CONNECTOR_TV, "TV", 0 }, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = @@ -699,6 +702,42 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, drm_property_add_enum(dev->mode_config.tv_mode_property, i, i, modes[i]); + dev->mode_config.tv_brightness_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "brightness", 2); + dev->mode_config.tv_brightness_property->values[0] = 0; + dev->mode_config.tv_brightness_property->values[1] = 100; + + dev->mode_config.tv_contrast_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "contrast", 2); + dev->mode_config.tv_contrast_property->values[0] = 0; + dev->mode_config.tv_contrast_property->values[1] = 100; + + dev->mode_config.tv_flicker_reduction_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "flicker reduction", 2); + dev->mode_config.tv_flicker_reduction_property->values[0] = 0; + dev->mode_config.tv_flicker_reduction_property->values[1] = 100; + + dev->mode_config.tv_overscan_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "overscan", 2); + dev->mode_config.tv_overscan_property->values[0] = 0; + dev->mode_config.tv_overscan_property->values[1] = 100; + + dev->mode_config.tv_saturation_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "saturation", 2); + dev->mode_config.tv_saturation_property->values[0] = 0; + dev->mode_config.tv_saturation_property->values[1] = 100; + + dev->mode_config.tv_hue_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "hue", 2); + dev->mode_config.tv_hue_property->values[0] = 0; + dev->mode_config.tv_hue_property->values[1] = 100; + return 0; } EXPORT_SYMBOL(drm_mode_create_tv_properties); @@ -1044,7 +1083,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - DRM_DEBUG("CRTC ID is %d\n", crtc->base.id); + DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id); if (put_user(crtc->base.id, crtc_id + copied)) { ret = -EFAULT; goto out; @@ -1072,7 +1111,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - DRM_DEBUG("ENCODER ID is %d\n", + DRM_DEBUG_KMS("ENCODER ID is %d\n", encoder->base.id); if (put_user(encoder->base.id, encoder_id + copied)) { @@ -1103,7 +1142,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - DRM_DEBUG("CONNECTOR ID is %d\n", + DRM_DEBUG_KMS("CONNECTOR ID is %d\n", connector->base.id); if (put_user(connector->base.id, connector_id + copied)) { @@ -1127,7 +1166,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, } card_res->count_connectors = connector_count; - DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs, + DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs, card_res->count_connectors, card_res->count_encoders); out: @@ -1230,7 +1269,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); - DRM_DEBUG("connector id %d:\n", out_resp->connector_id); + DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id); mutex_lock(&dev->mode_config.mutex); @@ -1406,7 +1445,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { - DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id); + DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); ret = -EINVAL; goto out; } @@ -1419,7 +1458,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, list_for_each_entry(crtcfb, &dev->mode_config.crtc_list, head) { if (crtcfb == crtc) { - DRM_DEBUG("Using current fb for setmode\n"); + DRM_DEBUG_KMS("Using current fb for " + "setmode\n"); fb = crtc->fb; } } @@ -1427,7 +1467,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, obj = drm_mode_object_find(dev, crtc_req->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { - DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id); + DRM_DEBUG_KMS("Unknown FB ID%d\n", + crtc_req->fb_id); ret = -EINVAL; goto out; } @@ -1440,13 +1481,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, } if (crtc_req->count_connectors == 0 && mode) { - DRM_DEBUG("Count connectors is 0 but mode set\n"); + DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0 && (!mode || !fb)) { - DRM_DEBUG("Count connectors is %d but no mode or fb set\n", + DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", crtc_req->count_connectors); ret = -EINVAL; goto out; @@ -1479,7 +1520,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, obj = drm_mode_object_find(dev, out_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { - DRM_DEBUG("Connector id %d unknown\n", out_id); + DRM_DEBUG_KMS("Connector id %d unknown\n", + out_id); ret = -EINVAL; goto out; } @@ -1512,7 +1554,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, struct drm_crtc *crtc; int ret = 0; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (!req->flags) { DRM_ERROR("no operation set\n"); @@ -1522,7 +1564,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { - DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id); + DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 6aaa2cb23365..a06c5f52c289 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -94,7 +94,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, int count = 0; int mode_flags = 0; - DRM_DEBUG("%s\n", drm_get_connector_name(connector)); + DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector)); /* set all modes to the unverified state */ list_for_each_entry_safe(mode, t, &connector->modes, head) mode->status = MODE_UNVERIFIED; @@ -102,7 +102,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->status = connector->funcs->detect(connector); if (connector->status == connector_status_disconnected) { - DRM_DEBUG("%s is disconnected\n", + DRM_DEBUG_KMS("%s is disconnected\n", drm_get_connector_name(connector)); /* TODO set EDID to NULL */ return 0; @@ -138,7 +138,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, drm_mode_sort(&connector->modes); - DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector)); + DRM_DEBUG_KMS("Probed modes for %s\n", + drm_get_connector_name(connector)); list_for_each_entry_safe(mode, t, &connector->modes, head) { mode->vrefresh = drm_mode_vrefresh(mode); @@ -184,12 +185,13 @@ static void drm_helper_add_std_modes(struct drm_device *dev, drm_mode_list_concat(&connector->probed_modes, &connector->modes); - DRM_DEBUG("Adding mode %s to %s\n", stdmode->name, + DRM_DEBUG_KMS("Adding mode %s to %s\n", stdmode->name, drm_get_connector_name(connector)); } drm_mode_sort(&connector->modes); - DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector)); + DRM_DEBUG_KMS("Added std modes on %s\n", + drm_get_connector_name(connector)); list_for_each_entry_safe(mode, t, &connector->modes, head) { mode->vrefresh = drm_mode_vrefresh(mode); @@ -312,7 +314,7 @@ static void drm_enable_connectors(struct drm_device *dev, bool *enabled) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { enabled[i] = drm_connector_enabled(connector, true); - DRM_DEBUG("connector %d enabled? %s\n", connector->base.id, + DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, enabled[i] ? "yes" : "no"); any_enabled |= enabled[i]; i++; @@ -342,7 +344,7 @@ static bool drm_target_preferred(struct drm_device *dev, continue; } - DRM_DEBUG("looking for preferred mode on connector %d\n", + DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", connector->base.id); modes[i] = drm_has_preferred_mode(connector, width, height); @@ -351,7 +353,7 @@ static bool drm_target_preferred(struct drm_device *dev, list_for_each_entry(modes[i], &connector->modes, head) break; } - DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name : + DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); i++; } @@ -452,7 +454,7 @@ static void drm_setup_crtcs(struct drm_device *dev) int width, height; int i, ret; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); width = dev->mode_config.max_width; height = dev->mode_config.max_height; @@ -475,7 +477,7 @@ static void drm_setup_crtcs(struct drm_device *dev) if (!ret) DRM_ERROR("Unable to find initial modes\n"); - DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height); + DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); drm_pick_crtcs(dev, crtcs, modes, 0, width, height); @@ -490,7 +492,7 @@ static void drm_setup_crtcs(struct drm_device *dev) } if (mode && crtc) { - DRM_DEBUG("desired mode %s set on crtc %d\n", + DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", mode->name, crtc->base.id); crtc->desired_mode = mode; connector->encoder->crtc = crtc; @@ -713,7 +715,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) struct drm_crtc_helper_funcs *crtc_funcs; int ret = 0; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (!set) return -EINVAL; @@ -726,7 +728,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) crtc_funcs = set->crtc->helper_private; - DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n", + DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:" + " %d (x, y) (%i, %i)\n", set->crtc, set->crtc->base.id, set->fb, set->connectors, (int)set->num_connectors, set->x, set->y); @@ -756,7 +759,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->fb == NULL) { - DRM_DEBUG("crtc has no fb, full mode set\n"); + DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); mode_changed = true; } else if (set->fb == NULL) { mode_changed = true; @@ -772,7 +775,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) fb_changed = true; if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { - DRM_DEBUG("modes are different, full mode set\n"); + DRM_DEBUG_KMS("modes are different, full mode set\n"); drm_mode_debug_printmodeline(&set->crtc->mode); drm_mode_debug_printmodeline(set->mode); mode_changed = true; @@ -798,7 +801,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } if (new_encoder != connector->encoder) { - DRM_DEBUG("encoder changed, full mode switch\n"); + DRM_DEBUG_KMS("encoder changed, full mode switch\n"); mode_changed = true; connector->encoder = new_encoder; } @@ -833,11 +836,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) goto fail_set_mode; } if (new_crtc != connector->encoder->crtc) { - DRM_DEBUG("crtc changed, full mode switch\n"); + DRM_DEBUG_KMS("crtc changed, full mode switch\n"); mode_changed = true; connector->encoder->crtc = new_crtc; } - DRM_DEBUG("setting connector %d crtc to %p\n", + DRM_DEBUG_KMS("setting connector %d crtc to %p\n", connector->base.id, new_crtc); } @@ -850,7 +853,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) set->crtc->fb = set->fb; set->crtc->enabled = (set->mode != NULL); if (set->mode != NULL) { - DRM_DEBUG("attempting to set mode from userspace\n"); + DRM_DEBUG_KMS("attempting to set mode from" + " userspace\n"); drm_mode_debug_printmodeline(set->mode); if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, @@ -903,7 +907,7 @@ EXPORT_SYMBOL(drm_crtc_helper_set_config); bool drm_helper_plugged_event(struct drm_device *dev) { - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, dev->mode_config.max_height); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index b39d7bfc0c9c..a75ca63deea6 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -63,12 +63,12 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), - DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7f2728bbc16c..a1cab5de2f4b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -61,6 +61,10 @@ /* use +hsync +vsync for detailed mode */ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) +#define LEVEL_DMT 0 +#define LEVEL_GTF 1 +#define LEVEL_CVT 2 + static struct edid_quirk { char *vendor; int product_id; @@ -240,25 +244,31 @@ static void edid_fixup_preferred(struct drm_connector *connector, /** * drm_mode_std - convert standard mode info (width, height, refresh) into mode * @t: standard timing params + * @timing_level: standard timing level * * Take the standard timing params (in this case width, aspect, and refresh) - * and convert them into a real mode using CVT. + * and convert them into a real mode using CVT/GTF/DMT. * * Punts for now, but should eventually use the FB layer's CVT based mode * generation code. */ struct drm_display_mode *drm_mode_std(struct drm_device *dev, - struct std_timing *t) + struct std_timing *t, + int timing_level) { struct drm_display_mode *mode; - int hsize = t->hsize * 8 + 248, vsize; + int hsize, vsize; + int vrefresh_rate; unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) >> EDID_TIMING_ASPECT_SHIFT; - - mode = drm_mode_create(dev); - if (!mode) - return NULL; - + unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) + >> EDID_TIMING_VFREQ_SHIFT; + + /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ + hsize = t->hsize * 8 + 248; + /* vrefresh_rate = vfreq + 60 */ + vrefresh_rate = vfreq + 60; + /* the vdisplay is calculated based on the aspect ratio */ if (aspect_ratio == 0) vsize = (hsize * 10) / 16; else if (aspect_ratio == 1) @@ -268,8 +278,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, else vsize = (hsize * 9) / 16; - drm_mode_set_name(mode); - + mode = NULL; + switch (timing_level) { + case LEVEL_DMT: + mode = drm_mode_create(dev); + if (mode) { + mode->hdisplay = hsize; + mode->vdisplay = vsize; + drm_mode_set_name(mode); + } + break; + case LEVEL_GTF: + mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + break; + case LEVEL_CVT: + mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + break; + } return mode; } @@ -451,6 +476,19 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e return modes; } +/** + * stanard_timing_level - get std. timing level(CVT/GTF/DMT) + * @edid: EDID block to scan + */ +static int standard_timing_level(struct edid *edid) +{ + if (edid->revision >= 2) { + if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) + return LEVEL_CVT; + return LEVEL_GTF; + } + return LEVEL_DMT; +} /** * add_standard_modes - get std. modes from EDID and add them @@ -463,6 +501,9 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid { struct drm_device *dev = connector->dev; int i, modes = 0; + int timing_level; + + timing_level = standard_timing_level(edid); for (i = 0; i < EDID_STD_TIMINGS; i++) { struct std_timing *t = &edid->standard_timings[i]; @@ -472,7 +513,8 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid if (t->hsize == 1 && t->vfreq_aspect == 1) continue; - newmode = drm_mode_std(dev, &edid->standard_timings[i]); + newmode = drm_mode_std(dev, &edid->standard_timings[i], + timing_level); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; @@ -496,6 +538,9 @@ static int add_detailed_info(struct drm_connector *connector, { struct drm_device *dev = connector->dev; int i, j, modes = 0; + int timing_level; + + timing_level = standard_timing_level(edid); for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { struct detailed_timing *timing = &edid->detailed_timings[i]; @@ -525,7 +570,8 @@ static int add_detailed_info(struct drm_connector *connector, struct drm_display_mode *newmode; std = &data->data.timings[j]; - newmode = drm_mode_std(dev, std); + newmode = drm_mode_std(dev, std, + timing_level); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c new file mode 100644 index 000000000000..f0184696edf3 --- /dev/null +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drm_encoder_slave.h" + +/** + * drm_i2c_encoder_init - Initialize an I2C slave encoder + * @dev: DRM device. + * @encoder: Encoder to be attached to the I2C device. You aren't + * required to have called drm_encoder_init() before. + * @adap: I2C adapter that will be used to communicate with + * the device. + * @info: Information that will be used to create the I2C device. + * Required fields are @addr and @type. + * + * Create an I2C device on the specified bus (the module containing its + * driver is transparently loaded) and attach it to the specified + * &drm_encoder_slave. The @slave_funcs field will be initialized with + * the hooks provided by the slave driver. + * + * Returns 0 on success or a negative errno on failure, in particular, + * -ENODEV is returned when no matching driver is found. + */ +int drm_i2c_encoder_init(struct drm_device *dev, + struct drm_encoder_slave *encoder, + struct i2c_adapter *adap, + const struct i2c_board_info *info) +{ + char modalias[sizeof(I2C_MODULE_PREFIX) + + I2C_NAME_SIZE]; + struct module *module = NULL; + struct i2c_client *client; + struct drm_i2c_encoder_driver *encoder_drv; + int err = 0; + + snprintf(modalias, sizeof(modalias), + "%s%s", I2C_MODULE_PREFIX, info->type); + request_module(modalias); + + client = i2c_new_device(adap, info); + if (!client) { + err = -ENOMEM; + goto fail; + } + + if (!client->driver) { + err = -ENODEV; + goto fail_unregister; + } + + module = client->driver->driver.owner; + if (!try_module_get(module)) { + err = -ENODEV; + goto fail_unregister; + } + + encoder->bus_priv = client; + + encoder_drv = to_drm_i2c_encoder_driver(client->driver); + + err = encoder_drv->encoder_init(client, dev, encoder); + if (err) + goto fail_unregister; + + return 0; + +fail_unregister: + i2c_unregister_device(client); + module_put(module); +fail: + return err; +} +EXPORT_SYMBOL(drm_i2c_encoder_init); + +/** + * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder + * @drm_encoder: Encoder to be unregistered. + * + * This should be called from the @destroy method of an I2C slave + * encoder driver once I2C access is no longer needed. + */ +void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder) +{ + struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder); + struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder); + struct module *module = client->driver->driver.owner; + + i2c_unregister_device(client); + encoder->bus_priv = NULL; + + module_put(module); +} +EXPORT_SYMBOL(drm_i2c_encoder_destroy); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 7914097b09c6..ab6e70eadc58 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -8,6 +8,8 @@ * Copyright © 2007 Dave Airlie * Copyright © 2007-2008 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> + * Copyright 2005-2006 Luc Verhaegen + * Copyright (c) 2001, Andy Ritger aritger@nvidia.com * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -38,7 +40,6 @@ #include "drm.h" #include "drm_crtc.h" -#define DRM_MODESET_DEBUG "drm_mode" /** * drm_mode_debug_printmodeline - debug print a mode * @dev: DRM device @@ -51,8 +52,8 @@ */ void drm_mode_debug_printmodeline(struct drm_display_mode *mode) { - DRM_DEBUG_MODE(DRM_MODESET_DEBUG, - "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", + DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " + "0x%x 0x%x\n", mode->base.id, mode->name, mode->vrefresh, mode->clock, mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, @@ -62,6 +63,420 @@ void drm_mode_debug_printmodeline(struct drm_display_mode *mode) EXPORT_SYMBOL(drm_mode_debug_printmodeline); /** + * drm_cvt_mode -create a modeline based on CVT algorithm + * @dev: DRM device + * @hdisplay: hdisplay size + * @vdisplay: vdisplay size + * @vrefresh : vrefresh rate + * @reduced : Whether the GTF calculation is simplified + * @interlaced:Whether the interlace is supported + * + * LOCKING: + * none. + * + * return the modeline based on CVT algorithm + * + * This function is called to generate the modeline based on CVT algorithm + * according to the hdisplay, vdisplay, vrefresh. + * It is based from the VESA(TM) Coordinated Video Timing Generator by + * Graham Loveridge April 9, 2003 available at + * http://www.vesa.org/public/CVT/CVTd6r1.xls + * + * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. + * What I have done is to translate it by using integer calculation. + */ +#define HV_FACTOR 1000 +struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, + int vdisplay, int vrefresh, + bool reduced, bool interlaced) +{ + /* 1) top/bottom margin size (% of height) - default: 1.8, */ +#define CVT_MARGIN_PERCENTAGE 18 + /* 2) character cell horizontal granularity (pixels) - default 8 */ +#define CVT_H_GRANULARITY 8 + /* 3) Minimum vertical porch (lines) - default 3 */ +#define CVT_MIN_V_PORCH 3 + /* 4) Minimum number of vertical back porch lines - default 6 */ +#define CVT_MIN_V_BPORCH 6 + /* Pixel Clock step (kHz) */ +#define CVT_CLOCK_STEP 250 + struct drm_display_mode *drm_mode; + bool margins = false; + unsigned int vfieldrate, hperiod; + int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; + int interlace; + + /* allocate the drm_display_mode structure. If failure, we will + * return directly + */ + drm_mode = drm_mode_create(dev); + if (!drm_mode) + return NULL; + + /* the CVT default refresh rate is 60Hz */ + if (!vrefresh) + vrefresh = 60; + + /* the required field fresh rate */ + if (interlaced) + vfieldrate = vrefresh * 2; + else + vfieldrate = vrefresh; + + /* horizontal pixels */ + hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY); + + /* determine the left&right borders */ + hmargin = 0; + if (margins) { + hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; + hmargin -= hmargin % CVT_H_GRANULARITY; + } + /* find the total active pixels */ + drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin; + + /* find the number of lines per field */ + if (interlaced) + vdisplay_rnd = vdisplay / 2; + else + vdisplay_rnd = vdisplay; + + /* find the top & bottom borders */ + vmargin = 0; + if (margins) + vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; + + drm_mode->vdisplay = vdisplay + 2 * vmargin; + + /* Interlaced */ + if (interlaced) + interlace = 1; + else + interlace = 0; + + /* Determine VSync Width from aspect ratio */ + if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay)) + vsync = 4; + else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay)) + vsync = 5; + else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay)) + vsync = 6; + else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay)) + vsync = 7; + else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay)) + vsync = 7; + else /* custom */ + vsync = 10; + + if (!reduced) { + /* simplify the GTF calculation */ + /* 4) Minimum time of vertical sync + back porch interval (µs) + * default 550.0 + */ + int tmp1, tmp2; +#define CVT_MIN_VSYNC_BP 550 + /* 3) Nominal HSync width (% of line period) - default 8 */ +#define CVT_HSYNC_PERCENTAGE 8 + unsigned int hblank_percentage; + int vsyncandback_porch, vback_porch, hblank; + + /* estimated the horizontal period */ + tmp1 = HV_FACTOR * 1000000 - + CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate; + tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 + + interlace; + hperiod = tmp1 * 2 / (tmp2 * vfieldrate); + + tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1; + /* 9. Find number of lines in sync + backporch */ + if (tmp1 < (vsync + CVT_MIN_V_PORCH)) + vsyncandback_porch = vsync + CVT_MIN_V_PORCH; + else + vsyncandback_porch = tmp1; + /* 10. Find number of lines in back porch */ + vback_porch = vsyncandback_porch - vsync; + drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + + vsyncandback_porch + CVT_MIN_V_PORCH; + /* 5) Definition of Horizontal blanking time limitation */ + /* Gradient (%/kHz) - default 600 */ +#define CVT_M_FACTOR 600 + /* Offset (%) - default 40 */ +#define CVT_C_FACTOR 40 + /* Blanking time scaling factor - default 128 */ +#define CVT_K_FACTOR 128 + /* Scaling factor weighting - default 20 */ +#define CVT_J_FACTOR 20 +#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256) +#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \ + CVT_J_FACTOR) + /* 12. Find ideal blanking duty cycle from formula */ + hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME * + hperiod / 1000; + /* 13. Blanking time */ + if (hblank_percentage < 20 * HV_FACTOR) + hblank_percentage = 20 * HV_FACTOR; + hblank = drm_mode->hdisplay * hblank_percentage / + (100 * HV_FACTOR - hblank_percentage); + hblank -= hblank % (2 * CVT_H_GRANULARITY); + /* 14. find the total pixes per line */ + drm_mode->htotal = drm_mode->hdisplay + hblank; + drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2; + drm_mode->hsync_start = drm_mode->hsync_end - + (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100; + drm_mode->hsync_start += CVT_H_GRANULARITY - + drm_mode->hsync_start % CVT_H_GRANULARITY; + /* fill the Vsync values */ + drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH; + drm_mode->vsync_end = drm_mode->vsync_start + vsync; + } else { + /* Reduced blanking */ + /* Minimum vertical blanking interval time (µs)- default 460 */ +#define CVT_RB_MIN_VBLANK 460 + /* Fixed number of clocks for horizontal sync */ +#define CVT_RB_H_SYNC 32 + /* Fixed number of clocks for horizontal blanking */ +#define CVT_RB_H_BLANK 160 + /* Fixed number of lines for vertical front porch - default 3*/ +#define CVT_RB_VFPORCH 3 + int vbilines; + int tmp1, tmp2; + /* 8. Estimate Horizontal period. */ + tmp1 = HV_FACTOR * 1000000 - + CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate; + tmp2 = vdisplay_rnd + 2 * vmargin; + hperiod = tmp1 / (tmp2 * vfieldrate); + /* 9. Find number of lines in vertical blanking */ + vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1; + /* 10. Check if vertical blanking is sufficient */ + if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH)) + vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH; + /* 11. Find total number of lines in vertical field */ + drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines; + /* 12. Find total number of pixels in a line */ + drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; + /* Fill in HSync values */ + drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; + drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; + } + /* 15/13. Find pixel clock frequency (kHz for xf86) */ + drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; + drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; + /* 18/16. Find actual vertical frame frequency */ + /* ignore - just set the mode flag for interlaced */ + if (interlaced) + drm_mode->vtotal *= 2; + /* Fill the mode line name */ + drm_mode_set_name(drm_mode); + if (reduced) + drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC | + DRM_MODE_FLAG_NVSYNC); + else + drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_NHSYNC); + if (interlaced) + drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + + return drm_mode; +} +EXPORT_SYMBOL(drm_cvt_mode); + +/** + * drm_gtf_mode - create the modeline based on GTF algorithm + * + * @dev :drm device + * @hdisplay :hdisplay size + * @vdisplay :vdisplay size + * @vrefresh :vrefresh rate. + * @interlaced :whether the interlace is supported + * @margins :whether the margin is supported + * + * LOCKING. + * none. + * + * return the modeline based on GTF algorithm + * + * This function is to create the modeline based on the GTF algorithm. + * Generalized Timing Formula is derived from: + * GTF Spreadsheet by Andy Morrish (1/5/97) + * available at http://www.vesa.org + * + * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. + * What I have done is to translate it by using integer calculation. + * I also refer to the function of fb_get_mode in the file of + * drivers/video/fbmon.c + */ +struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, + int vdisplay, int vrefresh, + bool interlaced, int margins) +{ + /* 1) top/bottom margin size (% of height) - default: 1.8, */ +#define GTF_MARGIN_PERCENTAGE 18 + /* 2) character cell horizontal granularity (pixels) - default 8 */ +#define GTF_CELL_GRAN 8 + /* 3) Minimum vertical porch (lines) - default 3 */ +#define GTF_MIN_V_PORCH 1 + /* width of vsync in lines */ +#define V_SYNC_RQD 3 + /* width of hsync as % of total line */ +#define H_SYNC_PERCENT 8 + /* min time of vsync + back porch (microsec) */ +#define MIN_VSYNC_PLUS_BP 550 + /* blanking formula gradient */ +#define GTF_M 600 + /* blanking formula offset */ +#define GTF_C 40 + /* blanking formula scaling factor */ +#define GTF_K 128 + /* blanking formula scaling factor */ +#define GTF_J 20 + /* C' and M' are part of the Blanking Duty Cycle computation */ +#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) +#define GTF_M_PRIME (GTF_K * GTF_M / 256) + struct drm_display_mode *drm_mode; + unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; + int top_margin, bottom_margin; + int interlace; + unsigned int hfreq_est; + int vsync_plus_bp, vback_porch; + unsigned int vtotal_lines, vfieldrate_est, hperiod; + unsigned int vfield_rate, vframe_rate; + int left_margin, right_margin; + unsigned int total_active_pixels, ideal_duty_cycle; + unsigned int hblank, total_pixels, pixel_freq; + int hsync, hfront_porch, vodd_front_porch_lines; + unsigned int tmp1, tmp2; + + drm_mode = drm_mode_create(dev); + if (!drm_mode) + return NULL; + + /* 1. In order to give correct results, the number of horizontal + * pixels requested is first processed to ensure that it is divisible + * by the character size, by rounding it to the nearest character + * cell boundary: + */ + hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; + hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN; + + /* 2. If interlace is requested, the number of vertical lines assumed + * by the calculation must be halved, as the computation calculates + * the number of vertical lines per field. + */ + if (interlaced) + vdisplay_rnd = vdisplay / 2; + else + vdisplay_rnd = vdisplay; + + /* 3. Find the frame rate required: */ + if (interlaced) + vfieldrate_rqd = vrefresh * 2; + else + vfieldrate_rqd = vrefresh; + + /* 4. Find number of lines in Top margin: */ + top_margin = 0; + if (margins) + top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / + 1000; + /* 5. Find number of lines in bottom margin: */ + bottom_margin = top_margin; + + /* 6. If interlace is required, then set variable interlace: */ + if (interlaced) + interlace = 1; + else + interlace = 0; + + /* 7. Estimate the Horizontal frequency */ + { + tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500; + tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) * + 2 + interlace; + hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1; + } + + /* 8. Find the number of lines in V sync + back porch */ + /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */ + vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000; + vsync_plus_bp = (vsync_plus_bp + 500) / 1000; + /* 9. Find the number of lines in V back porch alone: */ + vback_porch = vsync_plus_bp - V_SYNC_RQD; + /* 10. Find the total number of lines in Vertical field period: */ + vtotal_lines = vdisplay_rnd + top_margin + bottom_margin + + vsync_plus_bp + GTF_MIN_V_PORCH; + /* 11. Estimate the Vertical field frequency: */ + vfieldrate_est = hfreq_est / vtotal_lines; + /* 12. Find the actual horizontal period: */ + hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines); + + /* 13. Find the actual Vertical field frequency: */ + vfield_rate = hfreq_est / vtotal_lines; + /* 14. Find the Vertical frame frequency: */ + if (interlaced) + vframe_rate = vfield_rate / 2; + else + vframe_rate = vfield_rate; + /* 15. Find number of pixels in left margin: */ + if (margins) + left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / + 1000; + else + left_margin = 0; + + /* 16.Find number of pixels in right margin: */ + right_margin = left_margin; + /* 17.Find total number of active pixels in image and left and right */ + total_active_pixels = hdisplay_rnd + left_margin + right_margin; + /* 18.Find the ideal blanking duty cycle from blanking duty cycle */ + ideal_duty_cycle = GTF_C_PRIME * 1000 - + (GTF_M_PRIME * 1000000 / hfreq_est); + /* 19.Find the number of pixels in the blanking time to the nearest + * double character cell: */ + hblank = total_active_pixels * ideal_duty_cycle / + (100000 - ideal_duty_cycle); + hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN); + hblank = hblank * 2 * GTF_CELL_GRAN; + /* 20.Find total number of pixels: */ + total_pixels = total_active_pixels + hblank; + /* 21.Find pixel clock frequency: */ + pixel_freq = total_pixels * hfreq_est / 1000; + /* Stage 1 computations are now complete; I should really pass + * the results to another function and do the Stage 2 computations, + * but I only need a few more values so I'll just append the + * computations here for now */ + /* 17. Find the number of pixels in the horizontal sync period: */ + hsync = H_SYNC_PERCENT * total_pixels / 100; + hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; + hsync = hsync * GTF_CELL_GRAN; + /* 18. Find the number of pixels in horizontal front porch period */ + hfront_porch = hblank / 2 - hsync; + /* 36. Find the number of lines in the odd front porch period: */ + vodd_front_porch_lines = GTF_MIN_V_PORCH ; + + /* finally, pack the results in the mode struct */ + drm_mode->hdisplay = hdisplay_rnd; + drm_mode->hsync_start = hdisplay_rnd + hfront_porch; + drm_mode->hsync_end = drm_mode->hsync_start + hsync; + drm_mode->htotal = total_pixels; + drm_mode->vdisplay = vdisplay_rnd; + drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines; + drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD; + drm_mode->vtotal = vtotal_lines; + + drm_mode->clock = pixel_freq; + + drm_mode_set_name(drm_mode); + drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; + + if (interlaced) { + drm_mode->vtotal *= 2; + drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + + return drm_mode; +} +EXPORT_SYMBOL(drm_gtf_mode); +/** * drm_mode_set_name - set the name on a mode * @mode: name will be set in this mode * @@ -403,8 +818,7 @@ void drm_mode_prune_invalid(struct drm_device *dev, list_del(&mode->head); if (verbose) { drm_mode_debug_printmodeline(mode); - DRM_DEBUG_MODE(DRM_MODESET_DEBUG, - "Not using %s mode %d\n", + DRM_DEBUG_KMS("Not using %s mode %d\n", mode->name, mode->status); } drm_mode_destroy(dev, mode); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 85ec31b3ff00..de154556c405 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -16,6 +16,7 @@ #include <linux/kdev_t.h> #include <linux/err.h> +#include "drm_sysfs.h" #include "drm_core.h" #include "drmP.h" @@ -247,6 +248,7 @@ static ssize_t subconnector_show(struct device *device, case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: prop = dev->mode_config.tv_subconnector_property; is_tv = 1; break; @@ -287,6 +289,7 @@ static ssize_t select_subconnector_show(struct device *device, case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: prop = dev->mode_config.tv_select_subconnector_property; is_tv = 1; break; @@ -385,6 +388,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector) case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) { ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]); if (ret) @@ -512,3 +516,27 @@ void drm_sysfs_device_remove(struct drm_minor *minor) { device_unregister(&minor->kdev); } + + +/** + * drm_class_device_register - Register a struct device in the drm class. + * + * @dev: pointer to struct device to register. + * + * @dev should have all relevant members pre-filled with the exception + * of the class member. In particular, the device_type member must + * be set. + */ + +int drm_class_device_register(struct device *dev) +{ + dev->class = drm_class; + return device_register(dev); +} +EXPORT_SYMBOL_GPL(drm_class_device_register); + +void drm_class_device_unregister(struct device *dev) +{ + return device_unregister(dev); +} +EXPORT_SYMBOL_GPL(drm_class_device_unregister); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 50d1f782768c..544d889b9b16 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -33,8 +33,6 @@ #include "i915_drm.h" #include "i915_drv.h" -#define I915_DRV "i915_drv" - /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time * the head pointer changes, so that EBUSY only happens if the ring @@ -101,7 +99,7 @@ static int i915_init_phys_hws(struct drm_device *dev) memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } @@ -187,8 +185,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) master_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); } else { - DRM_DEBUG_DRIVER(I915_DRV, - "sarea not found assuming DRI2 userspace\n"); + DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); } if (init->ring_size != 0) { @@ -238,7 +235,7 @@ static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); + DRM_DEBUG_DRIVER("%s\n", __func__); if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" @@ -251,14 +248,14 @@ static int i915_dma_resume(struct drm_device * dev) DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } - DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n", + DRM_DEBUG_DRIVER("hw status page @ %p\n", dev_priv->hw_status_page); if (dev_priv->status_gfx_addr != 0) I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } @@ -552,7 +549,7 @@ static int i915_dispatch_flip(struct drm_device * dev) if (!master_priv->sarea_priv) return -EINVAL; - DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n", + DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", __func__, dev_priv->current_page, master_priv->sarea_priv->pf_current_page); @@ -633,8 +630,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, return -EINVAL; } - DRM_DEBUG_DRIVER(I915_DRV, - "i915 batchbuffer, start %x used %d cliprects %d\n", + DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", batch->start, batch->used, batch->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -681,8 +677,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, void *batch_data; int ret; - DRM_DEBUG_DRIVER(I915_DRV, - "i915 cmdbuffer, buf %p sz %d cliprects %d\n", + DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -735,7 +730,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, { int ret; - DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); + DRM_DEBUG_DRIVER("%s\n", __func__); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -778,7 +773,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; default: - DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n", + DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); return -EINVAL; } @@ -819,7 +814,7 @@ static int i915_setparam(struct drm_device *dev, void *data, dev_priv->fence_reg_start = param->value; break; default: - DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n", + DRM_DEBUG_DRIVER("unknown parameter %d\n", param->param); return -EINVAL; } @@ -846,7 +841,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, return 0; } - DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); + DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); @@ -868,9 +863,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data, memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n", + DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", dev_priv->status_gfx_addr); - DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n", + DRM_DEBUG_DRIVER("load hws at %p\n", dev_priv->hw_status_page); return 0; } @@ -1321,7 +1316,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv; - DRM_DEBUG_DRIVER(I915_DRV, "\n"); + DRM_DEBUG_DRIVER("\n"); i915_file_priv = (struct drm_i915_file_private *) kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 3f445a80c552..b78c550ae866 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -38,16 +38,6 @@ #include "i915_drv.h" #include <linux/acpi.h> -#define I915_LVDS "i915_lvds" - -/* - * the following four scaling options are defined. - * #define DRM_MODE_SCALE_NON_GPU 0 - * #define DRM_MODE_SCALE_FULLSCREEN 1 - * #define DRM_MODE_SCALE_NO_SCALE 2 - * #define DRM_MODE_SCALE_ASPECT 3 - */ - /* Private structure for the integrated LVDS support */ struct intel_lvds_priv { int fitting_mode; @@ -336,7 +326,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, I915_WRITE(BCLRPAT_B, 0); switch (lvds_priv->fitting_mode) { - case DRM_MODE_SCALE_NO_SCALE: + case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & * heights and modify the values programmed into the CRTC. @@ -672,9 +662,8 @@ static int intel_lvds_set_property(struct drm_connector *connector, connector->encoder) { struct drm_crtc *crtc = connector->encoder->crtc; struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; - if (value == DRM_MODE_SCALE_NON_GPU) { - DRM_DEBUG_KMS(I915_LVDS, - "non_GPU property is unsupported\n"); + if (value == DRM_MODE_SCALE_NONE) { + DRM_DEBUG_KMS("no scaling not supported\n"); return 0; } if (lvds_priv->fitting_mode == value) { @@ -731,8 +720,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS(I915_LVDS, - "Skipping LVDS initialization for %s\n", id->ident); + DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident); return 1; } @@ -1025,7 +1013,7 @@ out: return; failed: - DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n"); + DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); drm_connector_cleanup(connector); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 5371d9332554..91238f0c39a5 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -37,7 +37,6 @@ #include "intel_sdvo_regs.h" #undef SDVO_DEBUG -#define I915_SDVO "i915_sdvo" struct intel_sdvo_priv { u8 slave_addr; @@ -188,7 +187,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, return true; } - DRM_DEBUG("i2c transfer returned %d\n", ret); + DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } @@ -298,7 +297,7 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int i; - DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ", + DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); for (i = 0; i < args_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); @@ -351,7 +350,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int i; - DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); for (i = 0; i < response_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); for (; i < 8; i++) @@ -668,10 +667,10 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) status = intel_sdvo_read_response(intel_output, &response, 1); if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); + DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); return SDVO_CLOCK_RATE_MULT_1X; } else { - DRM_DEBUG("Current clock rate multiplier: %d\n", response); + DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response); } return response; @@ -952,14 +951,14 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) format = &sdvo_priv->tv_format; memset(&unset, 0, sizeof(unset)); if (memcmp(format, &unset, sizeof(*format))) { - DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n", + DRM_DEBUG_KMS("%s: Choosing default TV format of NTSC-M\n", SDVO_NAME(sdvo_priv)); format->ntsc_m = 1; intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, format, sizeof(*format)); status = intel_sdvo_read_response(output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) - DRM_DEBUG("%s: Failed to set TV format\n", + DRM_DEBUG_KMS("%s: Failed to set TV format\n", SDVO_NAME(sdvo_priv)); } } @@ -1230,8 +1229,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { - DRM_DEBUG("First %s output reported failure to sync\n", - SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS("First %s output reported failure to " + "sync\n", SDVO_NAME(sdvo_priv)); } if (0) @@ -1326,8 +1325,8 @@ static void intel_sdvo_restore(struct drm_connector *connector) intel_wait_for_vblank(dev); status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); if (status == SDVO_CMD_STATUS_SUCCESS && !input1) - DRM_DEBUG("First %s output reported failure to sync\n", - SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS("First %s output reported failure to " + "sync\n", SDVO_NAME(sdvo_priv)); } intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); @@ -1405,7 +1404,7 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) u8 response[2]; u8 status; struct intel_output *intel_output; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (!connector) return 0; @@ -1519,7 +1518,7 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); status = intel_sdvo_read_response(intel_output, &response, 2); - DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8); + DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); if (status != SDVO_CMD_STATUS_SUCCESS) return connector_status_unknown; @@ -2003,10 +2002,9 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) sdvo_priv->controlled_output = 0; memcpy(bytes, &sdvo_priv->caps.output_flags, 2); - DRM_DEBUG_KMS(I915_SDVO, - "%s: Unknown SDVO output type (0x%02x%02x)\n", - SDVO_NAME(sdvo_priv), - bytes[0], bytes[1]); + DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", + SDVO_NAME(sdvo_priv), + bytes[0], bytes[1]); ret = false; } @@ -2055,8 +2053,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { - DRM_DEBUG_KMS(I915_SDVO, - "No SDVO device found on SDVO%c\n", + DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } @@ -2079,7 +2076,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) if (intel_sdvo_output_setup(intel_output, sdvo_priv->caps.output_flags) != true) { - DRM_DEBUG("SDVO output failed to setup on SDVO%c\n", + DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } @@ -2112,7 +2109,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) &sdvo_priv->pixel_clock_max); - DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, " + DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 473e4775dc5a..10e8af6bb456 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -37,6 +37,7 @@ * TTM. */ struct radeon_mman { + struct ttm_bo_global_ref bo_global_ref; struct ttm_global_reference mem_global_ref; bool mem_global_referenced; struct ttm_bo_device bdev; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 15c3531377ed..6f5ad0802fcd 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -77,9 +77,25 @@ static int radeon_ttm_global_init(struct radeon_device *rdev) global_ref->release = &radeon_ttm_mem_global_release; r = ttm_global_item_ref(global_ref); if (r != 0) { - DRM_ERROR("Failed referencing a global TTM memory object.\n"); + DRM_ERROR("Failed setting up TTM memory accounting " + "subsystem.\n"); return r; } + + rdev->mman.bo_global_ref.mem_glob = + rdev->mman.mem_global_ref.object; + global_ref = &rdev->mman.bo_global_ref.ref; + global_ref->global_type = TTM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + r = ttm_global_item_ref(global_ref); + if (r != 0) { + DRM_ERROR("Failed setting up TTM BO subsystem.\n"); + ttm_global_item_unref(&rdev->mman.mem_global_ref); + return r; + } + rdev->mman.mem_global_referenced = true; return 0; } @@ -87,6 +103,7 @@ static int radeon_ttm_global_init(struct radeon_device *rdev) static void radeon_ttm_global_fini(struct radeon_device *rdev) { if (rdev->mman.mem_global_referenced) { + ttm_global_item_unref(&rdev->mman.bo_global_ref.ref); ttm_global_item_unref(&rdev->mman.mem_global_ref); rdev->mman.mem_global_referenced = false; } @@ -286,9 +303,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); out_cleanup: if (tmp_mem.mm_node) { - spin_lock(&rdev->mman.bdev.lru_lock); + struct ttm_bo_global *glob = rdev->mman.bdev.glob; + + spin_lock(&glob->lru_lock); drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&rdev->mman.bdev.lru_lock); + spin_unlock(&glob->lru_lock); return r; } return r; @@ -323,9 +342,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, } out_cleanup: if (tmp_mem.mm_node) { - spin_lock(&rdev->mman.bdev.lru_lock); + struct ttm_bo_global *glob = rdev->mman.bdev.glob; + + spin_lock(&glob->lru_lock); drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&rdev->mman.bdev.lru_lock); + spin_unlock(&glob->lru_lock); return r; } return r; @@ -446,7 +467,7 @@ int radeon_ttm_init(struct radeon_device *rdev) } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, - rdev->mman.mem_global_ref.object, + rdev->mman.bo_global_ref.ref.object, &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, rdev->need_dma32); if (r) { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c2b0d710d10f..fa87ccbcc6ce 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -44,6 +44,39 @@ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); +static void ttm_bo_global_kobj_release(struct kobject *kobj); + +static struct attribute ttm_bo_count = { + .name = "bo_count", + .mode = S_IRUGO +}; + +static ssize_t ttm_bo_global_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_bo_global *glob = + container_of(kobj, struct ttm_bo_global, kobj); + + return snprintf(buffer, PAGE_SIZE, "%lu\n", + (unsigned long) atomic_read(&glob->bo_count)); +} + +static struct attribute *ttm_bo_global_attrs[] = { + &ttm_bo_count, + NULL +}; + +static struct sysfs_ops ttm_bo_global_ops = { + .show = &ttm_bo_global_show +}; + +static struct kobj_type ttm_bo_glob_kobj_type = { + .release = &ttm_bo_global_kobj_release, + .sysfs_ops = &ttm_bo_global_ops, + .default_attrs = ttm_bo_global_attrs +}; + static inline uint32_t ttm_bo_type_flags(unsigned type) { @@ -66,10 +99,11 @@ static void ttm_bo_release_list(struct kref *list_kref) if (bo->ttm) ttm_tt_destroy(bo->ttm); + atomic_dec(&bo->glob->bo_count); if (bo->destroy) bo->destroy(bo); else { - ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); + ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size); kfree(bo); } } @@ -106,7 +140,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) kref_get(&bo->list_kref); if (bo->ttm != NULL) { - list_add_tail(&bo->swap, &bdev->swap_lru); + list_add_tail(&bo->swap, &bo->glob->swap_lru); kref_get(&bo->list_kref); } } @@ -141,7 +175,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret; while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { @@ -153,9 +187,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, if (no_wait) return -EBUSY; - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ret = ttm_bo_wait_unreserved(bo, interruptible); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (unlikely(ret)) return ret; @@ -181,16 +215,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int put_count = 0; int ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, sequence); if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); @@ -200,13 +234,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, void ttm_bo_unreserve(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); @@ -217,6 +251,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve); static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret = 0; uint32_t page_flags = 0; @@ -232,14 +267,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; case ttm_bo_type_kernel: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, - page_flags, bdev->dummy_read_page); + page_flags, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; case ttm_bo_type_user: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags | TTM_PAGE_FLAG_USER, - bdev->dummy_read_page); + glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; @@ -360,6 +395,7 @@ out_err: static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_bo_driver *driver = bdev->driver; int ret; @@ -371,7 +407,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) spin_unlock(&bo->lock); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ret = ttm_bo_reserve_locked(bo, false, false, false, 0); BUG_ON(ret); if (bo->ttm) @@ -386,7 +422,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) bo->mem.mm_node = NULL; } put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); atomic_set(&bo->reserved, 0); @@ -396,14 +432,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) return 0; } - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (list_empty(&bo->ddestroy)) { void *sync_obj = bo->sync_obj; void *sync_obj_arg = bo->sync_obj_arg; kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); spin_unlock(&bo->lock); if (sync_obj) @@ -413,7 +449,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ret = 0; } else { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); spin_unlock(&bo->lock); ret = -EBUSY; } @@ -428,11 +464,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry, *nentry; struct list_head *list, *next; int ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); list_for_each_safe(list, next, &bdev->ddestroy) { entry = list_entry(list, struct ttm_buffer_object, ddestroy); nentry = NULL; @@ -449,16 +486,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } kref_get(&entry->list_kref); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ret = ttm_bo_cleanup_refs(entry, remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (nentry) { bool next_onlist = !list_empty(next); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); kref_put(&nentry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); /* * Someone might have raced us and removed the * next entry from the list. We don't bother restarting @@ -472,7 +509,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) break; } ret = !list_empty(&bdev->ddestroy); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return ret; } @@ -522,6 +559,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, { int ret = 0; struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_mem_reg evict_mem; uint32_t proposed_placement; @@ -570,12 +608,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, goto out; } - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (evict_mem.mm_node) { drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); bo->evicted = true; out: return ret; @@ -590,6 +628,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, uint32_t mem_type, bool interruptible, bool no_wait) { + struct ttm_bo_global *glob = bdev->glob; struct drm_mm_node *node; struct ttm_buffer_object *entry; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -603,7 +642,7 @@ retry_pre_get: if (unlikely(ret != 0)) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); do { node = drm_mm_search_free(&man->manager, num_pages, mem->page_alignment, 1); @@ -624,7 +663,7 @@ retry_pre_get: if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(entry); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); if (unlikely(ret != 0)) return ret; @@ -640,21 +679,21 @@ retry_pre_get: if (ret) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } while (1); if (!node) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return -ENOMEM; } node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); if (unlikely(!node)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); goto retry_pre_get; } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); mem->mm_node = node; mem->mem_type = mem_type; return 0; @@ -723,6 +762,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool interruptible, bool no_wait) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_mem_type_manager *man; uint32_t num_prios = bdev->driver->num_mem_type_prio; @@ -762,20 +802,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); node = drm_mm_search_free(&man->manager, mem->num_pages, mem->page_alignment, 1); if (unlikely(!node)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); break; } node = drm_mm_get_block_atomic(node, mem->num_pages, mem-> page_alignment); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } while (!node); } if (node) @@ -848,7 +888,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t proposed_placement, bool interruptible, bool no_wait) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret = 0; struct ttm_mem_reg mem; @@ -884,9 +924,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, out_unlock: if (ret && mem.mm_node) { - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); drm_mm_put_block(mem.mm_node); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } return ret; } @@ -1022,6 +1062,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); bo->bdev = bdev; + bo->glob = bdev->glob; bo->type = type; bo->num_pages = num_pages; bo->mem.mem_type = TTM_PL_SYSTEM; @@ -1034,6 +1075,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, bo->seq_valid = false; bo->persistant_swap_storage = persistant_swap_storage; bo->acc_size = acc_size; + atomic_inc(&bo->glob->bo_count); ret = ttm_bo_check_placement(bo, flags, 0ULL); if (unlikely(ret != 0)) @@ -1072,13 +1114,13 @@ out_err: } EXPORT_SYMBOL(ttm_buffer_object_init); -static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, +static inline size_t ttm_bo_size(struct ttm_bo_global *glob, unsigned long num_pages) { size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; - return bdev->ttm_bo_size + 2 * page_array_size; + return glob->ttm_bo_size + 2 * page_array_size; } int ttm_buffer_object_create(struct ttm_bo_device *bdev, @@ -1093,18 +1135,18 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, { struct ttm_buffer_object *bo; int ret; - struct ttm_mem_global *mem_glob = bdev->mem_glob; + struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; size_t acc_size = - ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); - ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); + ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(bo == NULL)) { - ttm_mem_global_free(mem_glob, acc_size, false); + ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } @@ -1150,6 +1192,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, struct list_head *head, unsigned mem_type, bool allow_errors) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry; int ret; int put_count; @@ -1158,30 +1201,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, * Can't use standard list traversal since we're unlocking. */ - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); while (!list_empty(head)) { entry = list_first_entry(head, struct ttm_buffer_object, lru); kref_get(&entry->list_kref); ret = ttm_bo_reserve_locked(entry, false, false, false, 0); put_count = ttm_bo_del_from_lru(entry); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&entry->list_kref, ttm_bo_ref_bug); BUG_ON(ret); ret = ttm_bo_leave_list(entry, mem_type, allow_errors); ttm_bo_unreserve(entry); kref_put(&entry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return 0; } int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man; int ret = -EINVAL; @@ -1204,13 +1248,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) if (mem_type > 0) { ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (drm_mm_clean(&man->manager)) drm_mm_takedown(&man->manager); else ret = -EBUSY; - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } return ret; @@ -1284,11 +1328,83 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, } EXPORT_SYMBOL(ttm_bo_init_mm); +static void ttm_bo_global_kobj_release(struct kobject *kobj) +{ + struct ttm_bo_global *glob = + container_of(kobj, struct ttm_bo_global, kobj); + + printk(KERN_INFO TTM_PFX "Freeing bo global.\n"); + ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); + __free_page(glob->dummy_read_page); + kfree(glob); +} + +void ttm_bo_global_release(struct ttm_global_reference *ref) +{ + struct ttm_bo_global *glob = ref->object; + + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); +} +EXPORT_SYMBOL(ttm_bo_global_release); + +int ttm_bo_global_init(struct ttm_global_reference *ref) +{ + struct ttm_bo_global_ref *bo_ref = + container_of(ref, struct ttm_bo_global_ref, ref); + struct ttm_bo_global *glob = ref->object; + int ret; + + mutex_init(&glob->device_list_mutex); + spin_lock_init(&glob->lru_lock); + glob->mem_glob = bo_ref->mem_glob; + glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); + + if (unlikely(glob->dummy_read_page == NULL)) { + ret = -ENOMEM; + goto out_no_drp; + } + + INIT_LIST_HEAD(&glob->swap_lru); + INIT_LIST_HEAD(&glob->device_list); + + ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); + ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); + if (unlikely(ret != 0)) { + printk(KERN_ERR TTM_PFX + "Could not register buffer object swapout.\n"); + goto out_no_shrink; + } + + glob->ttm_bo_extra_size = + ttm_round_pot(sizeof(struct ttm_tt)) + + ttm_round_pot(sizeof(struct ttm_backend)); + + glob->ttm_bo_size = glob->ttm_bo_extra_size + + ttm_round_pot(sizeof(struct ttm_buffer_object)); + + atomic_set(&glob->bo_count, 0); + + kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); + ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); + if (unlikely(ret != 0)) + kobject_put(&glob->kobj); + return ret; +out_no_shrink: + __free_page(glob->dummy_read_page); +out_no_drp: + kfree(glob); + return ret; +} +EXPORT_SYMBOL(ttm_bo_global_init); + + int ttm_bo_device_release(struct ttm_bo_device *bdev) { int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct ttm_mem_type_manager *man; + struct ttm_bo_global *glob = bdev->glob; while (i--) { man = &bdev->man[i]; @@ -1304,100 +1420,76 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) } } + mutex_lock(&glob->device_list_mutex); + list_del(&bdev->device_list); + mutex_unlock(&glob->device_list_mutex); + if (!cancel_delayed_work(&bdev->wq)) flush_scheduled_work(); while (ttm_bo_delayed_delete(bdev, true)) ; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); if (list_empty(&bdev->man[0].lru)) TTM_DEBUG("Swap list was clean\n"); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); - ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink); BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); write_lock(&bdev->vm_lock); drm_mm_takedown(&bdev->addr_space_mm); write_unlock(&bdev->vm_lock); - __free_page(bdev->dummy_read_page); return ret; } EXPORT_SYMBOL(ttm_bo_device_release); -/* - * This function is intended to be called on drm driver load. - * If you decide to call it from firstopen, you must protect the call - * from a potentially racing ttm_bo_driver_finish in lastclose. - * (This may happen on X server restart). - */ - int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_mem_global *mem_glob, - struct ttm_bo_driver *driver, uint64_t file_page_offset, + struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + uint64_t file_page_offset, bool need_dma32) { int ret = -EINVAL; - bdev->dummy_read_page = NULL; rwlock_init(&bdev->vm_lock); - spin_lock_init(&bdev->lru_lock); + spin_lock_init(&glob->lru_lock); bdev->driver = driver; - bdev->mem_glob = mem_glob; memset(bdev->man, 0, sizeof(bdev->man)); - bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); - if (unlikely(bdev->dummy_read_page == NULL)) { - ret = -ENOMEM; - goto out_err0; - } - /* * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); if (unlikely(ret != 0)) - goto out_err1; + goto out_no_sys; bdev->addr_space_rb = RB_ROOT; ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); if (unlikely(ret != 0)) - goto out_err2; + goto out_no_addr_mm; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); bdev->nice_mode = true; INIT_LIST_HEAD(&bdev->ddestroy); - INIT_LIST_HEAD(&bdev->swap_lru); bdev->dev_mapping = NULL; + bdev->glob = glob; bdev->need_dma32 = need_dma32; - ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); - ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); - if (unlikely(ret != 0)) { - printk(KERN_ERR TTM_PFX - "Could not register buffer object swapout.\n"); - goto out_err2; - } - bdev->ttm_bo_extra_size = - ttm_round_pot(sizeof(struct ttm_tt)) + - ttm_round_pot(sizeof(struct ttm_backend)); - - bdev->ttm_bo_size = bdev->ttm_bo_extra_size + - ttm_round_pot(sizeof(struct ttm_buffer_object)); + mutex_lock(&glob->device_list_mutex); + list_add_tail(&bdev->device_list, &glob->device_list); + mutex_unlock(&glob->device_list_mutex); return 0; -out_err2: +out_no_addr_mm: ttm_bo_clean_mm(bdev, 0); -out_err1: - __free_page(bdev->dummy_read_page); -out_err0: +out_no_sys: return ret; } EXPORT_SYMBOL(ttm_bo_device_init); @@ -1647,21 +1739,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) { - struct ttm_bo_device *bdev = - container_of(shrink, struct ttm_bo_device, shrink); + struct ttm_bo_global *glob = + container_of(shrink, struct ttm_bo_global, shrink); struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); while (ret == -EBUSY) { - if (unlikely(list_empty(&bdev->swap_lru))) { - spin_unlock(&bdev->lru_lock); + if (unlikely(list_empty(&glob->swap_lru))) { + spin_unlock(&glob->lru_lock); return -EBUSY; } - bo = list_first_entry(&bdev->swap_lru, + bo = list_first_entry(&glob->swap_lru, struct ttm_buffer_object, swap); kref_get(&bo->list_kref); @@ -1673,16 +1765,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ttm_bo_wait_unreserved(bo, false); kref_put(&bo->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } } BUG_ON(ret != 0); put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); @@ -1736,6 +1828,6 @@ out: void ttm_bo_swapout_all(struct ttm_bo_device *bdev) { - while (ttm_bo_swapout(&bdev->shrink) == 0) + while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ad4ada07c6cf..c70927ecda21 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -41,9 +41,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) struct ttm_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node) { - spin_lock(&bo->bdev->lru_lock); + spin_lock(&bo->glob->lru_lock); drm_mm_put_block(old_mem->mm_node); - spin_unlock(&bo->bdev->lru_lock); + spin_unlock(&bo->glob->lru_lock); } old_mem->mm_node = NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index 0b14eb1972b8..541744d00d3e 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c @@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) mutex_lock(&item->mutex); if (item->refcount == 0) { - item->object = kmalloc(ref->size, GFP_KERNEL); + item->object = kzalloc(ref->size, GFP_KERNEL); if (unlikely(item->object == NULL)) { ret = -ENOMEM; goto out_err; @@ -89,7 +89,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) mutex_unlock(&item->mutex); return 0; out_err: - kfree(item->object); mutex_unlock(&item->mutex); item->object = NULL; return ret; @@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref) BUG_ON(ref->object != item->object); if (--item->refcount == 0) { ref->release(ref); - kfree(item->object); item->object = NULL; } mutex_unlock(&item->mutex); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 87323d4ff68d..62fb5cf0899e 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -26,15 +26,180 @@ **************************************************************************/ #include "ttm/ttm_memory.h" +#include "ttm/ttm_module.h" #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/module.h> -#define TTM_PFX "[TTM] " #define TTM_MEMORY_ALLOC_RETRIES 4 +struct ttm_mem_zone { + struct kobject kobj; + struct ttm_mem_global *glob; + const char *name; + uint64_t zone_mem; + uint64_t emer_mem; + uint64_t max_mem; + uint64_t swap_limit; + uint64_t used_mem; +}; + +static struct attribute ttm_mem_sys = { + .name = "zone_memory", + .mode = S_IRUGO +}; +static struct attribute ttm_mem_emer = { + .name = "emergency_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_max = { + .name = "available_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_swap = { + .name = "swap_limit", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_used = { + .name = "used_memory", + .mode = S_IRUGO +}; + +static void ttm_mem_zone_kobj_release(struct kobject *kobj) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + + printk(KERN_INFO TTM_PFX + "Zone %7s: Used memory at exit: %llu kiB.\n", + zone->name, (unsigned long long) zone->used_mem >> 10); + kfree(zone); +} + +static ssize_t ttm_mem_zone_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + uint64_t val = 0; + + spin_lock(&zone->glob->lock); + if (attr == &ttm_mem_sys) + val = zone->zone_mem; + else if (attr == &ttm_mem_emer) + val = zone->emer_mem; + else if (attr == &ttm_mem_max) + val = zone->max_mem; + else if (attr == &ttm_mem_swap) + val = zone->swap_limit; + else if (attr == &ttm_mem_used) + val = zone->used_mem; + spin_unlock(&zone->glob->lock); + + return snprintf(buffer, PAGE_SIZE, "%llu\n", + (unsigned long long) val >> 10); +} + +static void ttm_check_swapping(struct ttm_mem_global *glob); + +static ssize_t ttm_mem_zone_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, + size_t size) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + int chars; + unsigned long val; + uint64_t val64; + + chars = sscanf(buffer, "%lu", &val); + if (chars == 0) + return size; + + val64 = val; + val64 <<= 10; + + spin_lock(&zone->glob->lock); + if (val64 > zone->zone_mem) + val64 = zone->zone_mem; + if (attr == &ttm_mem_emer) { + zone->emer_mem = val64; + if (zone->max_mem > val64) + zone->max_mem = val64; + } else if (attr == &ttm_mem_max) { + zone->max_mem = val64; + if (zone->emer_mem < val64) + zone->emer_mem = val64; + } else if (attr == &ttm_mem_swap) + zone->swap_limit = val64; + spin_unlock(&zone->glob->lock); + + ttm_check_swapping(zone->glob); + + return size; +} + +static struct attribute *ttm_mem_zone_attrs[] = { + &ttm_mem_sys, + &ttm_mem_emer, + &ttm_mem_max, + &ttm_mem_swap, + &ttm_mem_used, + NULL +}; + +static struct sysfs_ops ttm_mem_zone_ops = { + .show = &ttm_mem_zone_show, + .store = &ttm_mem_zone_store +}; + +static struct kobj_type ttm_mem_zone_kobj_type = { + .release = &ttm_mem_zone_kobj_release, + .sysfs_ops = &ttm_mem_zone_ops, + .default_attrs = ttm_mem_zone_attrs, +}; + +static void ttm_mem_global_kobj_release(struct kobject *kobj) +{ + struct ttm_mem_global *glob = + container_of(kobj, struct ttm_mem_global, kobj); + + kfree(glob); +} + +static struct kobj_type ttm_mem_glob_kobj_type = { + .release = &ttm_mem_global_kobj_release, +}; + +static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, + bool from_wq, uint64_t extra) +{ + unsigned int i; + struct ttm_mem_zone *zone; + uint64_t target; + + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + + if (from_wq) + target = zone->swap_limit; + else if (capable(CAP_SYS_ADMIN)) + target = zone->emer_mem; + else + target = zone->max_mem; + + target = (extra > target) ? 0ULL : target; + + if (zone->used_mem > target) + return true; + } + return false; +} + /** * At this point we only support a single shrink callback. * Extend this if needed, perhaps using a linked list of callbacks. @@ -42,34 +207,17 @@ * many threads may try to swap out at any given time. */ -static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue, +static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, uint64_t extra) { int ret; struct ttm_mem_shrink *shrink; - uint64_t target; - uint64_t total_target; spin_lock(&glob->lock); if (glob->shrink == NULL) goto out; - if (from_workqueue) { - target = glob->swap_limit; - total_target = glob->total_memory_swap_limit; - } else if (capable(CAP_SYS_ADMIN)) { - total_target = glob->emer_total_memory; - target = glob->emer_memory; - } else { - total_target = glob->max_total_memory; - target = glob->max_memory; - } - - total_target = (extra >= total_target) ? 0 : total_target - extra; - target = (extra >= target) ? 0 : target - extra; - - while (glob->used_memory > target || - glob->used_total_memory > total_target) { + while (ttm_zones_above_swap_target(glob, from_wq, extra)) { shrink = glob->shrink; spin_unlock(&glob->lock); ret = shrink->do_shrink(shrink); @@ -81,6 +229,8 @@ out: spin_unlock(&glob->lock); } + + static void ttm_shrink_work(struct work_struct *work) { struct ttm_mem_global *glob = @@ -89,63 +239,178 @@ static void ttm_shrink_work(struct work_struct *work) ttm_shrink(glob, true, 0ULL); } +static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram - si->totalhigh; + mem *= si->mem_unit; + + zone->name = "kernel"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_kernel = zone; + glob->zones[glob->num_zones++] = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + return kobject_add(&zone->kobj, &glob->kobj, zone->name); +} + +#ifdef CONFIG_HIGHMEM +static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + + if (unlikely(!zone)) + return -ENOMEM; + + if (si->totalhigh == 0) + return 0; + + mem = si->totalram; + mem *= si->mem_unit; + + zone->name = "highmem"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_highmem = zone; + glob->zones[glob->num_zones++] = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + return kobject_add(&zone->kobj, &glob->kobj, zone->name); +} +#else +static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram; + mem *= si->mem_unit; + + /** + * No special dma32 zone needed. + */ + + if (mem <= ((uint64_t) 1ULL << 32)) + return 0; + + /* + * Limit max dma32 memory to 4GB for now + * until we can figure out how big this + * zone really is. + */ + + mem = ((uint64_t) 1ULL << 32); + zone->name = "dma32"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_dma32 = zone; + glob->zones[glob->num_zones++] = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + return kobject_add(&zone->kobj, &glob->kobj, zone->name); +} +#endif + int ttm_mem_global_init(struct ttm_mem_global *glob) { struct sysinfo si; - uint64_t mem; + int ret; + int i; + struct ttm_mem_zone *zone; spin_lock_init(&glob->lock); glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); init_waitqueue_head(&glob->queue); + kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); + ret = kobject_add(&glob->kobj, + ttm_get_kobj(), + "memory_accounting"); + if (unlikely(ret != 0)) + goto out_no_zone; si_meminfo(&si); - mem = si.totalram - si.totalhigh; - mem *= si.mem_unit; - - glob->max_memory = mem >> 1; - glob->emer_memory = (mem >> 1) + (mem >> 2); - glob->swap_limit = glob->max_memory - (mem >> 3); - glob->used_memory = 0; - glob->used_total_memory = 0; - glob->shrink = NULL; - - mem = si.totalram; - mem *= si.mem_unit; - - glob->max_total_memory = mem >> 1; - glob->emer_total_memory = (mem >> 1) + (mem >> 2); - - glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3); - - printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n", - glob->max_total_memory >> 20); - printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n", - glob->max_memory >> 20); - + ret = ttm_mem_init_kernel_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#ifdef CONFIG_HIGHMEM + ret = ttm_mem_init_highmem_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#else + ret = ttm_mem_init_dma32_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#endif + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + printk(KERN_INFO TTM_PFX + "Zone %7s: Available graphics memory: %llu kiB.\n", + zone->name, (unsigned long long) zone->max_mem >> 10); + } return 0; +out_no_zone: + ttm_mem_global_release(glob); + return ret; } EXPORT_SYMBOL(ttm_mem_global_init); void ttm_mem_global_release(struct ttm_mem_global *glob) { - printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n", - (unsigned long long)glob->used_total_memory); + unsigned int i; + struct ttm_mem_zone *zone; + flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); glob->swap_queue = NULL; + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + kobject_del(&zone->kobj); + kobject_put(&zone->kobj); + } + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); } EXPORT_SYMBOL(ttm_mem_global_release); -static inline void ttm_check_swapping(struct ttm_mem_global *glob) +static void ttm_check_swapping(struct ttm_mem_global *glob) { - bool needs_swapping; + bool needs_swapping = false; + unsigned int i; + struct ttm_mem_zone *zone; spin_lock(&glob->lock); - needs_swapping = (glob->used_memory > glob->swap_limit || - glob->used_total_memory > - glob->total_memory_swap_limit); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (zone->used_mem > zone->swap_limit) { + needs_swapping = true; + break; + } + } + spin_unlock(&glob->lock); if (unlikely(needs_swapping)) @@ -153,44 +418,60 @@ static inline void ttm_check_swapping(struct ttm_mem_global *glob) } -void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount, bool himem) +static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t amount) { + unsigned int i; + struct ttm_mem_zone *zone; + spin_lock(&glob->lock); - glob->used_total_memory -= amount; - if (!himem) - glob->used_memory -= amount; - wake_up_all(&glob->queue); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem -= amount; + } spin_unlock(&glob->lock); } +void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount) +{ + return ttm_mem_global_free_zone(glob, NULL, amount); +} + static int ttm_mem_global_reserve(struct ttm_mem_global *glob, - uint64_t amount, bool himem, bool reserve) + struct ttm_mem_zone *single_zone, + uint64_t amount, bool reserve) { uint64_t limit; - uint64_t lomem_limit; int ret = -ENOMEM; + unsigned int i; + struct ttm_mem_zone *zone; spin_lock(&glob->lock); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; - if (capable(CAP_SYS_ADMIN)) { - limit = glob->emer_total_memory; - lomem_limit = glob->emer_memory; - } else { - limit = glob->max_total_memory; - lomem_limit = glob->max_memory; - } + limit = (capable(CAP_SYS_ADMIN)) ? + zone->emer_mem : zone->max_mem; - if (unlikely(glob->used_total_memory + amount > limit)) - goto out_unlock; - if (unlikely(!himem && glob->used_memory + amount > lomem_limit)) - goto out_unlock; + if (zone->used_mem > limit) + goto out_unlock; + } if (reserve) { - glob->used_total_memory += amount; - if (!himem) - glob->used_memory += amount; + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem += amount; + } } + ret = 0; out_unlock: spin_unlock(&glob->lock); @@ -199,12 +480,17 @@ out_unlock: return ret; } -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - bool no_wait, bool interruptible, bool himem) + +static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t memory, + bool no_wait, bool interruptible) { int count = TTM_MEMORY_ALLOC_RETRIES; - while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) + while (unlikely(ttm_mem_global_reserve(glob, + single_zone, + memory, true) != 0)) { if (no_wait) return -ENOMEM; @@ -216,6 +502,56 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, return 0; } +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + bool no_wait, bool interruptible) +{ + /** + * Normal allocations of kernel memory are registered in + * all zones. + */ + + return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, + interruptible); +} + +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, + bool no_wait, bool interruptible) +{ + + struct ttm_mem_zone *zone = NULL; + + /** + * Page allocations may be registed in a single zone + * only if highmem or !dma32. + */ + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, + interruptible); +} + +void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) +{ + struct ttm_mem_zone *zone = NULL; + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); +} + + size_t ttm_round_pot(size_t size) { if ((size & (size - 1)) == 0) diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 59ce8191d584..9a6edbfeaa9e 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -29,16 +29,72 @@ * Jerome Glisse */ #include <linux/module.h> -#include <ttm/ttm_module.h> +#include <linux/device.h> +#include <linux/sched.h> +#include "ttm/ttm_module.h" +#include "drm_sysfs.h" + +static DECLARE_WAIT_QUEUE_HEAD(exit_q); +atomic_t device_released; + +static struct device_type ttm_drm_class_type = { + .name = "ttm", + /** + * Add pm ops here. + */ +}; + +static void ttm_drm_class_device_release(struct device *dev) +{ + atomic_set(&device_released, 1); + wake_up_all(&exit_q); +} + +static struct device ttm_drm_class_device = { + .type = &ttm_drm_class_type, + .release = &ttm_drm_class_device_release +}; + +struct kobject *ttm_get_kobj(void) +{ + struct kobject *kobj = &ttm_drm_class_device.kobj; + BUG_ON(kobj == NULL); + return kobj; +} static int __init ttm_init(void) { + int ret; + + ret = dev_set_name(&ttm_drm_class_device, "ttm"); + if (unlikely(ret != 0)) + return ret; + ttm_global_init(); + + atomic_set(&device_released, 0); + ret = drm_class_device_register(&ttm_drm_class_device); + if (unlikely(ret != 0)) + goto out_no_dev_reg; + return 0; +out_no_dev_reg: + atomic_set(&device_released, 1); + wake_up_all(&exit_q); + ttm_global_release(); + return ret; } static void __exit ttm_exit(void) { + drm_class_device_unregister(&ttm_drm_class_device); + + /** + * Refuse to unload until the TTM device is released. + * Not sure this is 100% needed. + */ + + wait_event(exit_q, atomic_read(&device_released) == 1); ttm_global_release(); } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index b8b6c4a5f983..42cca5519761 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -179,7 +179,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) set_page_dirty_lock(page); ttm->pages[i] = NULL; - ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); + ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE); put_page(page); } ttm->state = tt_unpopulated; @@ -190,8 +190,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) { struct page *p; - struct ttm_bo_device *bdev = ttm->bdev; - struct ttm_mem_global *mem_glob = bdev->mem_glob; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; int ret; while (NULL == (p = ttm->pages[index])) { @@ -200,21 +199,14 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) if (!p) return NULL; - if (PageHighMem(p)) { - ret = - ttm_mem_global_alloc(mem_glob, PAGE_SIZE, - false, false, true); - if (unlikely(ret != 0)) - goto out_err; + ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); + if (unlikely(ret != 0)) + goto out_err; + + if (PageHighMem(p)) ttm->pages[--ttm->first_himem_page] = p; - } else { - ret = - ttm_mem_global_alloc(mem_glob, PAGE_SIZE, - false, false, false); - if (unlikely(ret != 0)) - goto out_err; + else ttm->pages[++ttm->last_lomem_page] = p; - } } return p; out_err: @@ -368,8 +360,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) printk(KERN_ERR TTM_PFX "Erroneous page count. " "Leaking pages.\n"); - ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, - PageHighMem(cur_page)); + ttm_mem_global_free_page(ttm->glob->mem_glob, + cur_page); __free_page(cur_page); } } @@ -414,7 +406,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, struct mm_struct *mm = tsk->mm; int ret; int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; - struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; BUG_ON(num_pages != ttm->num_pages); BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); @@ -424,7 +416,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, */ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, - false, false, false); + false, false); if (unlikely(ret != 0)) return ret; @@ -435,7 +427,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, if (ret != num_pages && write) { ttm_tt_free_user_pages(ttm); - ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); + ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); return -ENOMEM; } @@ -459,8 +451,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, if (!ttm) return NULL; - ttm->bdev = bdev; - + ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->first_himem_page = ttm->num_pages; ttm->last_lomem_page = -1; |